Data was imported using the \data_gathering.RMD script. See that script for details of collection.
Taking in raw data and adding a parseable timestamp while filtering on the date and client_ids.
Define functions to create posts per day of week graphs, and timeseries of engagement line graphs.
Shape data into vertical data formats.
##
## Attaching package: 'chron'
## The following objects are masked from 'package:lubridate':
##
## days, hours, minutes, seconds, years
## [1] "tbl_df" "tbl" "data.frame"
First plot is aggregated engagement by content type. Second plot, it engagement by type for client(Labatt).
Looking at the engagement by content type we see that Labatt is garnering its most significant engagment on Photos, Video, and Links.
[ ] TODO: we need to compare posting activity with engagement activity (scatter plot)
Horizontal stacked bar chart for total engagement comparison of all companies
reorder_size <- function(x) {
factor(x, levels = names(sort(table(x))))
}
p <- summary_stats %>%
filter(Engagement != "Total.Posts") %>%
ggplot(., aes(x = Company, y = Number, fill = Engagement)) +
geom_bar(stat = "identity") +
xlab('Brand') + ylab('Engagement') +
ggtitle('Total Engagement(Facebook)') +
coord_flip()
plot(p)
Total posts per day of the week.
# without brand ID these are uninformative
for(i in seq_along(df_names)) {
p <- day_of_week(df_names[i], client_names[i])
plot(p)
}
p <- ggplot(data = all_companies_ts, aes(x = wday(timestamp, label = TRUE))) +
geom_bar(aes(fill = ..count..)) +
theme(legend.position = "none") +
xlab("Day of the Week") + ylab("Number of Posts") +
scale_fill_gradient(low = "midnightblue", high = "aquamarine4") +
facet_wrap(~from_name, ncol = 4) +
ggtitle("Daily Posting Activity by brand")
plot(p)
dowDat <- select(all_companies_ts, total_engagement,from_name, timestamp)
dowDat$dow <- wday(dowDat$timestamp, label=TRUE)
dowDat <- aggregate(total_engagement~dow+from_name, data=dowDat, FUN=mean)
p <- ggplot(dowDat, aes(x = dow, y = total_engagement)) +
geom_bar(stat="identity", aes(fill = total_engagement)) +
facet_grid(~from_name) +
ggtitle('Engagements Per Day of Week') +
theme(legend.position = "none") +
xlab("Day of the Week") + ylab("Number of Engagements") +
scale_fill_gradient(low = "midnightblue", high = "aquamarine4")
plot(p)
-[ ] TODO: Create a plot for Post by engagement graphics (scatter plot). To answer the question on days with lots of posts do we get lots of engagment.
[] TODO: With that data we can ask what posts get the most engagment, we can look at top engagment and bottom engagements posts and what qualities they share or differ by.
[X] Time of day visual break down?
Plots for the timeseries engagement line.
for(i in seq_along(df_names)) {
p <- timeseries_engagement(client_names_proper[i])
plot(p)
}
Test viz, showed spike in enegagment for Bud Light in august 2016.
all_companies_ts <- all_companies_ts %>%
filter(from_id %in% client_ids) %>%
mutate(month = as.Date(cut(all_companies_ts$timestamp, breaks = "month")))
ggplot(all_companies_ts, aes(x = month, y = total_engagement)) +
geom_line(aes(group = from_name, color = factor(from_name)))
all_companies_ts %>%
select(from_name, month, total_engagement) %>%
group_by(from_name,month) %>%
summarise(totEng = sum(total_engagement)) %>%
ggplot(., aes(x = month, y = totEng)) +
geom_point(aes(color = from_name)) +
geom_smooth(aes(color = from_name), se = FALSE)
all_companies_ts %>%
select(from_name, month, total_engagement) %>%
filter(from_name != "Bud Light" ) %>%
filter(from_name != "Michelob ULTRA") %>%
group_by(from_name,month) %>%
summarise(totEng = sum(total_engagement)) %>%
ggplot(., aes(x = month, y = totEng)) +
geom_point(aes(color = from_name)) +
geom_smooth(aes(color = from_name), se = FALSE) +
ggtitle("Monthly Facebook Engagement w/o Bud & MichULTRA")
What is different about the content during this period?
Might be valuable to look back at the entire timeseries for periods of distinct dynamism.
Removed filter because labatt does not have significant inflection point whereas previous analysis
labatt$timestamp <- date(labatt$timestamp)
labatt_clean_pre <- str_replace_all(labatt$message, "@\\w+", "")
labatt_clean_pre <- gsub("&", "", labatt_clean_pre)
labatt_clean_pre <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", labatt_clean_pre)
labatt_clean_pre <- gsub("@\\w+", "", labatt_clean_pre)
labatt_clean_pre <- gsub("[[:punct:]]", "", labatt_clean_pre)
labatt_clean_pre <- gsub("[[:digit:]]", "", labatt_clean_pre)
labatt_clean_pre <- gsub("http\\w+", "", labatt_clean_pre)
labatt_clean_pre <- gsub("[ \t]{2,}", "", labatt_clean_pre)
labatt_clean_pre <- gsub("^\\s+|\\s+$", "", labatt_clean_pre)
labatt_corpus_pre <- Corpus(VectorSource(labatt_clean_pre))
labatt_corpus_pre <- tm_map(labatt_corpus_pre, removePunctuation)
labatt_corpus_pre <- tm_map(labatt_corpus_pre, content_transformer(tolower))
labatt_corpus_pre <- tm_map(labatt_corpus_pre, removeWords, stopwords("english"))
labatt_corpus_pre <- tm_map(labatt_corpus_pre, removeWords, c("amp", "2yo", "3yo", "4yo"))
labatt_corpus_pre <- tm_map(labatt_corpus_pre, stripWhitespace)
pal <- brewer.pal(9,"YlGnBu")
pal <- pal[-(1:4)]
set.seed(123)
wordcloud(words = labatt_corpus_pre, scale=c(5,0.1), max.words=25, random.order=FALSE,
rot.per=0.35, use.r.layout=FALSE, colors=pal)
Displays engagement per post to find outliers.
p <- ggplot(all_companies_ts, aes(x = month, y = total_engagement)) +
geom_point(aes(color = from_name)) +
xlab("Year") + ylab("Total Engagement") +
theme(legend.title=element_blank(),
legend.text=element_text(size=12),
legend.position=c(0.18, 0.77),
legend.background=element_rect(fill=alpha('gray', 0)))
plot(p)
# q <- aggregate(all_companies_ts$total_engagement~all_companies_ts$month+
# all_companies_ts$from_name,
# FUN=sum)
#
# ggplot(q, aes(x = q$`all_companies_ts$month`, y = q$`all_companies_ts$total_engagement`)) +
# geom_line(aes(color=q$`all_companies_ts$from_name`)) +
# ylab("Total Engagement") + xlab("Year") +
# theme(legend.title=element_blank(),
# legend.text=element_text(size=12),
# legend.position=c(0.18, 0.77),
# legend.background=element_rect(fill=alpha('gray', 0)))
### molson Content Over Time ###
t <- all_companies_ts %>%
filter(., from_name == "Molson Canadian")
t <- data.frame(table(t$month, t$type))
t$Var1 <- date(t$Var1)
ggplot(t, aes(x = Var1, y = Freq, group = Var2)) +
geom_line(aes(color=Var2)) +
ggtitle('Molson Engagement') +
xlab("Year") + ylab("Post Frequency") +
theme(legend.title=element_blank(),
legend.text=element_text(size=12),
legend.position=c(0.18, 0.77),
legend.background=element_rect(fill=alpha('gray', 0)))
#TRISTEN'S GRAPHS!!
#Labatt Content Over Time
### Labatt Content Over Time ###
t <- all_companies_ts %>%
filter(., from_name == "Labatt USA")
t <- data.frame(table(t$month, t$type))
t$Var1 <- date(t$Var1)
ggplot(t, aes(x = Var1, y = Freq, group = Var2)) +
geom_line(aes(color=Var2)) +
ggtitle('Labatt Facebook Activity') +
xlab("Year") + ylab("Post Frequency") +
theme(legend.title=element_blank(),
legend.text=element_text(size=12),
legend.position=c(0.18, 0.77),
legend.background=element_rect(fill=alpha('gray', 0)))
#Labatt Content Over Time
#MichelobULTRA Content Over Time ###
t <- all_companies_ts %>%
filter(., from_name == "Michelob ULTRA")
t <- data.frame(table(t$month, t$type))
t$Var1 <- date(t$Var1)
ggplot(t, aes(x = Var1, y = Freq, group = Var2)) +
geom_line(aes(color=Var2)) +
ggtitle('Michelob ULTRA Engagement') +
xlab("Year") + ylab("Post Frequency") +
theme(legend.title=element_blank(),
legend.text=element_text(size=12),
legend.position=c(0.18, 0.77),
legend.background=element_rect(fill=alpha('gray', 0)))
#Labatt Content Over Time
#Bud Light Content Over Time ###
t <- all_companies_ts %>%
filter(., from_name == "Bud Light")
t <- data.frame(table(t$month, t$type))
t$Var1 <- date(t$Var1)
ggplot(t, aes(x = Var1, y = Freq, group = Var2)) +
geom_line(aes(color=Var2)) +
ggtitle('Bud Light Engagement') +
xlab("Year") + ylab("Post Frequency") +
theme(legend.title=element_blank(),
legend.text=element_text(size=12),
legend.position=c(0.18, 0.77),
legend.background=element_rect(fill=alpha('gray', 0)))
# LabattUSA_timeline %>%
# filter()
#
#
# tweets <- LabattUSA_timeline$text
# match <- regmatches(tweets,gregexpr("#[[:alnum:]]+",tweets))
#
# # Convert the list to a corpus
# # new_corpus <- as.VCorpus(new_list) from Stackoverflow (http://stackoverflow.com/questions/34061912/how-transform-a-list-into-a-corpus-in-r)
#
# new_corpus <- as.VCorpus(match)
# class(new_corpus)
# inspect(new_corpus)
#
# EnsurePackage <- function(x) {
# # EnsurePackage(x) - Installs and loads a package if necessary
# # Args:
# # x: name of package
#
# x <- as.character(x)
# if (!require(x, character.only=TRUE)) {
# install.packages(pkgs=x, repos="http://cran.r-project.org")
# require(x, character.only=TRUE)
# }
# }
#
# MakeWordCloud <- function(corpus) {
# # Make a word cloud
# #
# # Args:
# # textVec: a text vector
# #
# # Returns:
# # A word cloud created from the text vector
#
# EnsurePackage("tm")
# EnsurePackage("wordcloud")
# EnsurePackage("RColorBrewer")
#
# corpus <- tm_map(corpus, function(x) {
# removeWords(x, c("via", "rt", "mt"))
# })
#
# ap.tdm <- TermDocumentMatrix(corpus)
# ap.m <- as.matrix(ap.tdm)
# ap.v <- sort(rowSums(ap.m), decreasing=TRUE)
# ap.d <- data.frame(word = names(ap.v), freq=ap.v)
# table(ap.d$freq)
# pal2 <- brewer.pal(8, "Dark2")
#
# wordcloud(ap.d$word, ap.d$freq,
# scale=c(8, .2), min.freq = 3,
# max.words = Inf, random.order = FALSE,
# rot.per = .15, colors = pal2)
# }
#
# MakeWordCloud(new_corpus)
# p <- unfiltered_ts %>%
# summarise(jd = doy(timestamp)) %>%
# group_by(jd) %>%
# ggplot(aes(factor(jd),total_engagement)) +
# geom_boxplot() +
# facet_grid(~ from_name)
# plot(p)
[ ] Create a data.frame with these columns brand, data, tweet, engagement (I think this is a subset of all_companies)
[ ] summary table of brand, month, totEng, see examples:http://leonawicz.github.io/HtmlWidgetExamples/ex_dt_sparkline.html
all_companies_ts %>%
select(from_name, timestamp, total_engagement) %>%
group_by(from_name, month(timestamp), year(timestamp)) %>%
summarise(count = n(),
engagement = sum(total_engagement)) %>%
ggplot(., aes(y = log(engagement), x = log(count), colour = from_name)) +
geom_point() +
geom_smooth(se = FALSE, method = "lm") +
#geom_smooth(se = FALSE)
ggtitle("Engagement vs Post Acitivity")
all_companies_ts %>%
#filter(from_name != "Bud Light" ) %>%
#filter(from_name != "Michelob ULTRA") %>%
select(from_name, timestamp, total_engagement) %>%
group_by(from_name, month(timestamp), year(timestamp)) %>%
summarise(count = n(),
engagement = sum(total_engagement)) %>%
ggplot(., aes(y = engagement, x = count, colour = from_name)) +
geom_point() +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Engagement vs Post Acitivity") +
ylab("Total Engagement") + xlab("Total Monthly Posts")
There is a positive relationship between post activity (ie counts) and total engagement.
[ ] TOD vs engagement similar to post activity vs Engagement
# load('processed_data/bud_fb.RData')
# bud$total_engagement <- rowSums(bud[,9:11])
# z <- bud %>%
# arrange(desc(total_engagement))
# head(z)
# Updated upstream
text_clean <- function(cleanliness) {
cleanliness <- str_replace_all(cleanliness, "@\\w+", "")
cleanliness <- gsub("&", "", cleanliness)
cleanliness <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", cleanliness)
cleanliness <- gsub("@\\w+", "", cleanliness)
cleanliness <- gsub("[[:punct:]]", "", cleanliness)
cleanliness <- gsub("[[:digit:]]", "", cleanliness)
cleanliness <- gsub("http\\w+", "", cleanliness)
cleanliness <- gsub("[ \t]{2,}", "", cleanliness)
cleanliness <- gsub("^\\s+|\\s+$", "", cleanliness)
return(cleanliness)
}
LabattUSA_timeline$sentiment <- lapply(text_clean(LabattUSA_timeline$text), get_nrc_sentiment)
labatt_sentiment <- data.frame('created' = LabattUSA_timeline$created,
'text' = LabattUSA_timeline$text,
'sentiment' = as.character(LabattUSA_timeline$sentiment))
labatt_sentiment$score <- get_sentiment(as.character(text_clean(labatt_sentiment$text))) %>% as.numeric()
labatt_sentiment %>%
arrange(desc(score)) %>%
select(created, score) %>%
tail(5)
## created score
## 704 2016-03-14 20:38:19 -1.50
## 705 2015-07-16 16:18:59 -1.50
## 706 2016-01-10 04:59:26 -1.75
## 707 2015-06-04 01:44:07 -2.50
## 708 2015-04-09 17:54:52 -2.50
labatt_sentiment %>%
ggplot(aes(as_date(created), score)) +
geom_line(size = 1) +
geom_smooth() +
scale_color_manual(values = colourList) +
scale_x_date(breaks = date_breaks("3 months"), labels = date_format("%Y-%b")) +
scale_y_continuous(name = "Sentiment\n", breaks = seq(-5, 5, by = 1)) + theme_bw() +
ggtitle('Labatt Sentiment')
Molson_Canadian_timeline$sentiment <- lapply(text_clean(Molson_Canadian_timeline$text), get_nrc_sentiment)
molson_sentiment <- data.frame('created' = Molson_Canadian_timeline$created,
'text' = Molson_Canadian_timeline$text,
'sentiment' = as.character(Molson_Canadian_timeline$sentiment))
molson_sentiment$score <- get_sentiment(as.character(text_clean(molson_sentiment$text))) %>% as.numeric()
molson_sentiment %>%
arrange(desc(score)) %>%
select(created, score) %>%
tail(5)
## created score
## 273 2016-09-04 03:18:47 -0.75
## 274 2016-07-14 13:45:27 -0.75
## 275 2016-01-04 23:05:09 -1.00
## 276 2016-02-07 20:14:09 -1.25
## 277 2016-06-07 13:52:23 -1.50
molson_sentiment %>%
ggplot(aes(as_date(created), score)) +
geom_line(size = 1) +
geom_smooth() +
scale_color_manual(values = colourList) +
scale_x_date(breaks = date_breaks("3 months"), labels = date_format("%Y-%b")) +
scale_y_continuous(name = "Sentiment\n", breaks = seq(-5, 5, by = 1)) + theme_bw() +
ggtitle('Molson Sentiment')
budlight_timeline$sentiment <- lapply(text_clean(budlight_timeline$text), get_nrc_sentiment)
budlight_sentiment <- data.frame('created' = budlight_timeline$created,
'text' = budlight_timeline$text,
'sentiment' = as.character(budlight_timeline$sentiment))
budlight_sentiment$score <- get_sentiment(as.character(text_clean(budlight_sentiment$text))) %>% as.numeric()
budlight_sentiment %>%
arrange(desc(score)) %>%
select(created, score) %>%
tail(5)
## created score
## 3175 2016-09-25 23:11:45 -0.75
## 3176 2016-09-18 20:49:26 -0.75
## 3177 2016-08-19 13:20:11 -0.75
## 3178 2016-09-27 00:16:40 -1.00
## 3179 2016-08-12 15:01:09 -1.00
budlight_sentiment %>%
ggplot(aes(as_date(created), score)) +
geom_line(size = 1) +
geom_smooth() +
scale_color_manual(values = colourList) +
scale_x_date(breaks = date_breaks("3 months"), labels = date_format("%Y-%b")) +
scale_y_continuous(name = "Sentiment\n", breaks = seq(-5, 5, by = 1)) + theme_bw() +
ggtitle('BudLight Sentiment')
MichelobULTRA_timeline$sentiment <- lapply(text_clean(MichelobULTRA_timeline$text), get_nrc_sentiment)
michelob_sentiment <- data.frame('created' = MichelobULTRA_timeline$created,
'text' = MichelobULTRA_timeline$text,
'sentiment' = as.character(MichelobULTRA_timeline$sentiment))
michelob_sentiment$score <- get_sentiment(as.character(text_clean(michelob_sentiment$text))) %>% as.numeric()
michelob_sentiment %>%
arrange(desc(score)) %>%
select(created, score) %>%
tail(5)
## created score
## 2751 2014-04-14 17:01:02 -0.80
## 2752 2014-02-06 00:05:27 -0.85
## 2753 2016-02-24 20:10:22 -1.00
## 2754 2014-07-23 18:01:13 -1.00
## 2755 2015-03-13 22:52:57 -1.15
michelob_sentiment %>%
ggplot(aes(as_date(created), score)) +
geom_line(size = 1) +
geom_smooth() +
scale_color_manual(values = colourList) +
scale_x_date(breaks = date_breaks("3 months"), labels = date_format("%Y-%b")) +
scale_y_continuous(name = "Sentiment\n", breaks = seq(-5, 5, by = 1)) + theme_bw() +
ggtitle('Michelob Sentiment')